%load_ext autoreload
%autoreload 2Hainich with ERA-Interim
Manual fine tuning learning process for presentation 18 Jan 2023
from meteo_imp.kalman.fastai import *
from meteo_imp.kalman.filter import *
from meteo_imp.utils import *
from meteo_imp.data import *
from meteo_imp.data import _def_meteo_vars, units
from fastai.tabular.learner import *
from fastai.learner import *
from fastai.callback.all import *
from fastcore.foundation import L
from meteo_imp.kalman.fastai import show_results
import pandas as pd
import numpy as np
import torch
from pyprojroot import here
from sklearn.decomposition import PCAbase_dir = here("analysis/presentations/plots_18_jan")
base_dir.mkdir(exist_ok=True)def save_plot(p, path):
f_name = base_dir / (path + ".vl.json")
with open(f_name, 'w') as f:
f.write(p.to_json())
return f_namereset_seed()hai = pd.read_parquet(hai_path64)
hai_era = pd.read_parquet(hai_era_path64)PCA
pc = PCA().fit(hai)d0 = hai.iloc[0:1]tr0 = pc.transform(d0)tr0array([[-121.11917652, -7.06844313, 0.87975241]])
t0 = np.random.randn(5,1) * 10
tt = np.hstack([t0, 2 * t0 + np.random.randn(5,1), 3 * t0 + np.random.randn(5,1)])tt = np.vstack([tt, -tt])tt.mean(0)array([1.77635684e-16, 0.00000000e+00, 0.00000000e+00])
def pca(X):
# Data matrix X, assumes 0-centered
n, m = X.shape
assert np.allclose(X.mean(axis=0), np.zeros(m))
# Compute covariance matrix
C = np.dot(X.T, X) / (n-1)
# Eigen decomposition
eigen_vals, eigen_vecs = np.linalg.eig(C)
# Project X onto PC space
X_pca = np.dot(X, eigen_vecs)
return X_pca, eigen_vecspca(tt)(array([[-9.15947487e+00, -3.03773243e-01, 3.81626348e-01],
[ 2.00045215e+01, -1.99595583e-02, 1.53745860e+00],
[ 1.33744310e+01, 2.01395206e-02, -3.19819214e-02],
[-8.99057811e+00, 1.24208069e-01, 9.57988123e-01],
[ 3.14616875e+01, -4.88141095e-02, -5.79117681e-01],
[ 9.15947487e+00, 3.03773243e-01, -3.81626348e-01],
[-2.00045215e+01, 1.99595583e-02, -1.53745860e+00],
[-1.33744310e+01, -2.01395206e-02, 3.19819214e-02],
[ 8.99057811e+00, -1.24208069e-01, -9.57988123e-01],
[-3.14616875e+01, 4.88141095e-02, 5.79117681e-01]]),
array([[-0.26545847, -0.95659856, 0.12021234],
[-0.53518386, 0.04249433, -0.84366608],
[-0.80194142, 0.28829401, 0.52323659]]))
sk_pc = PCA(2).fit(tt)tt @ sk_pc.components_.Tarray([[ -9.15947487, 0.38162635],
[ 20.00452148, 1.5374586 ],
[ 13.37443103, -0.03198192],
[ -8.99057811, 0.95798812],
[ 31.46168754, -0.57911768],
[ 9.15947487, -0.38162635],
[-20.00452148, -1.5374586 ],
[-13.37443103, 0.03198192],
[ 8.99057811, -0.95798812],
[-31.46168754, 0.57911768]])
tr = sk_pc.transform(tt)
trarray([[ -9.15947487, 0.38162635],
[ 20.00452148, 1.5374586 ],
[ 13.37443103, -0.03198192],
[ -8.99057811, 0.95798812],
[ 31.46168754, -0.57911768],
[ 9.15947487, -0.38162635],
[-20.00452148, -1.5374586 ],
[-13.37443103, 0.03198192],
[ 8.99057811, -0.95798812],
[-31.46168754, 0.57911768]])
sk_pc.components_.Tarray([[-0.26545847, 0.12021234],
[-0.53518386, -0.84366608],
[-0.80194142, 0.52323659]])
(sk_pc.components_ @ tt.T).Tarray([[ -9.15947487, 0.38162635],
[ 20.00452148, 1.5374586 ],
[ 13.37443103, -0.03198192],
[ -8.99057811, 0.95798812],
[ 31.46168754, -0.57911768],
[ 9.15947487, -0.38162635],
[-20.00452148, -1.5374586 ],
[-13.37443103, 0.03198192],
[ 8.99057811, -0.95798812],
[-31.46168754, 0.57911768]])
tt[0, None]array([[2.76792539, 4.56712931, 7.45746711]])
sk_pc.components_.shape(3, 3)
tt[0].shape(3,)
(sk_pc.components_ @ tt[0])array([-9.15947487, 0.38162635])
tt[0]array([2.76792539, 4.56712931, 7.45746711])
sk_pc.components_.T @ tr[0]array([2.47733634, 4.58003795, 7.54504311])
sk_pc.inverse_transform(tr[0])array([2.47733634, 4.58003795, 7.54504311])
d0.to_numpy()array([[-0.6 , 0. , 0.222]])
hai.iloc[0]TA -0.600
SW_IN 0.000
VPD 0.222
Name: 2000-01-01 00:30:00, dtype: float64
\[ x = y\Lambda \]
pc.components_array([[ 0.01681572, 0.99979324, 0.01143269],
[ 0.93010891, -0.01983747, 0.36674772],
[-0.36689868, -0.00446652, 0.93025018]])
np.linalg.inv(pc.components_)array([[ 0.01681572, 0.93010891, -0.36689868],
[ 0.99979324, -0.01983747, -0.00446652],
[ 0.01143269, 0.36674772, 0.93025018]])
Setup
assuming that the control has the same dimensions of the observations then if we are doing a local slope model we have \(B \in \mathbb{R}^{state \times contr}\): \[ B = \begin{bmatrix} -I & I \\ 0 & 0 \end{bmatrix}\]
from torch import hstack, eye, vstack, ones, zeros, tensor
from functools import partialdef set_dtype(*args, dtype=torch.float64):
return [partial(arg, dtype=dtype) for arg in args] eye, ones, zeros, tensor = set_dtype(eye, ones, zeros, tensor)def init_smart(n_dim_obs, n_dim_state, df, pca=True):
# n_dim_obs == n_dim_contr
if pca:
comp = PCA(n_dim_state).fit(df).components_
obs_matrix = tensor(comp.T) # transform state -> obs
contr_matrix = tensor(comp) # transform obs -> state
else:
obs_matrix, contr_matrix = eye(n_dim_obs), eye(n_dim_obs)
return KalmanFilter(
trans_matrix = vstack([hstack([eye(n_dim_state), eye(n_dim_state)]),
hstack([zeros(n_dim_state, n_dim_state), eye(n_dim_state)])]),
trans_off = zeros(n_dim_state * 2),
trans_cov = eye(n_dim_state * 2)*.1,
obs_matrix = hstack([obs_matrix, zeros(n_dim_obs, n_dim_state)]),
obs_off = zeros(n_dim_obs),
obs_cov = eye(n_dim_obs)*.01,
contr_matrix = vstack([hstack([-contr_matrix, contr_matrix]),
hstack([ zeros(n_dim_state,n_dim_obs), zeros(n_dim_state, n_dim_obs)])]),
init_state_mean = zeros(n_dim_state * 2),
init_state_cov = eye(n_dim_state * 2) * 3,
) np.hstack([np.eye(2), np.eye(2)])array([[1., 0., 1., 0.],
[0., 1., 0., 1.]])
init_smart(3,2, hai)| state | x_0 | x_1 | x_2 | x_3 |
|---|---|---|---|---|
| x_0 | 1.0000 | 0.0000 | 1.0000 | 0.0000 |
| x_1 | 0.0000 | 1.0000 | 0.0000 | 1.0000 |
| x_2 | 0.0000 | 0.0000 | 1.0000 | 0.0000 |
| x_3 | 0.0000 | 0.0000 | 0.0000 | 1.0000 |
trans cov (Q)
| state | x_0 | x_1 | x_2 | x_3 |
|---|---|---|---|---|
| x_0 | 0.1000 | 0.0000 | 0.0000 | 0.0000 |
| x_1 | 0.0000 | 0.1000 | 0.0000 | 0.0000 |
| x_2 | 0.0000 | 0.0000 | 0.1000 | 0.0000 |
| x_3 | 0.0000 | 0.0000 | 0.0000 | 0.1000 |
trans off
| state | offset |
|---|---|
| x_0 | 0.0000 |
| x_1 | 0.0000 |
| x_2 | 0.0000 |
| x_3 | 0.0000 |
obs matrix (H)
| variable | x_0 | x_1 | x_2 | x_3 |
|---|---|---|---|---|
| y_0 | 0.0168 | 0.9301 | 0.0000 | 0.0000 |
| y_1 | 0.9998 | -0.0198 | 0.0000 | 0.0000 |
| y_2 | 0.0114 | 0.3667 | 0.0000 | 0.0000 |
obs cov (R)
| variable | y_0 | y_1 | y_2 |
|---|---|---|---|
| y_0 | 0.0100 | 0.0000 | 0.0000 |
| y_1 | 0.0000 | 0.0100 | 0.0000 |
| y_2 | 0.0000 | 0.0000 | 0.0100 |
obs off
| variable | offset |
|---|---|
| y_0 | 0.0000 |
| y_1 | 0.0000 |
| y_2 | 0.0000 |
contr matrix (B)
| state | c_0 | c_1 | c_2 | c_3 | c_4 | c_5 |
|---|---|---|---|---|---|---|
| x_0 | -0.0168 | -0.9998 | -0.0114 | 0.0168 | 0.9998 | 0.0114 |
| x_1 | -0.9301 | 0.0198 | -0.3667 | 0.9301 | -0.0198 | 0.3667 |
| x_2 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |
| x_3 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |
init state mean
| state | mean |
|---|---|
| x_0 | 0.0000 |
| x_1 | 0.0000 |
| x_2 | 0.0000 |
| x_3 | 0.0000 |
init state cov
| state | x_0 | x_1 | x_2 | x_3 |
|---|---|---|---|---|
| x_0 | 3.0000 | 0.0000 | 0.0000 | 0.0000 |
| x_1 | 0.0000 | 3.0000 | 0.0000 | 0.0000 |
| x_2 | 0.0000 | 0.0000 | 3.0000 | 0.0000 |
| x_3 | 0.0000 | 0.0000 | 0.0000 | 3.0000 |
class PersistentRecorder(Callback):
order = 70
name = "per_recorder"
attrs = ['lrs', 'iters', 'losses', 'values']
def before_fit(self):
"Prepare state for training"
for attr in self.attrs:
if not hasattr(self.per_recorder, attr): setattr(self.per_recorder, attr, [])
def after_batch(self):
for attr in self.attrs:
setattr(self.per_recorder, attr, getattr(self.recorder, attr))models = []dls = imp_dataloader(hai, hai_era, var_sel = ['TA', 'SW_IN', 'VPD'], block_len=200, gap_len=10, bs=20, control_lags=[1], n_rep=2)len(dls.valid.items)
items = [dls.valid.items[i] for i in [10, 50, 100, 200, 300, 400]]def train_show_save(learn, n_iter, lr):
learn.fit(n_iter, lr)
models.append(learn.model.state_dict().copy())
learn.recorder.plot_loss()
items = [learn.dls.valid.items[i] for i in [10, 50, 100, 200, 300, 400]]
return show_results(learn, items = items, control=hai_control)Gaps all variables
model = init_smart(3,3, hai, pca=False).cuda()
model.var_names = _def_meteo_vars.values()loss = loss_func=KalmanLoss(only_gap=False)
learn = Learner(dls, model, loss, cbs=[Float64Callback], metrics=imp_metrics)learn.model.use_smooth = Trueshow_results(learn, items=items, control=hai_control)train_show_save(learn, 1, 1e-3)| epoch | train_loss | valid_loss | rmse | rmse_gap | r2 | r2_gap | time |
|---|---|---|---|---|---|---|---|
| 0 | -87.370314 | -96.212915 | 0.069196 | 0.210351 | 0.946861 | -250571814513532732431918956544.000000 | 02:37 |
| state | x_0 | x_1 | x_2 | x_3 | x_4 | x_5 |
|---|---|---|---|---|---|---|
| x_0 | 1.1489 | 0.0042 | 0.0120 | 0.8404 | -0.0794 | -0.1127 |
| x_1 | -0.0954 | 1.1464 | -0.0546 | 0.1177 | 0.8560 | 0.0599 |
| x_2 | -0.0225 | 0.0420 | 1.1357 | -0.0717 | -0.1190 | 0.8475 |
| x_3 | 0.1201 | 0.0661 | 0.1170 | 1.1840 | -0.1150 | -0.1073 |
| x_4 | 0.0770 | 0.1299 | 0.0068 | -0.1847 | 1.0987 | -0.1500 |
| x_5 | 0.1084 | 0.0358 | 0.1133 | -0.1809 | -0.0281 | 1.1533 |
trans cov (Q)
| state | x_0 | x_1 | x_2 | x_3 | x_4 | x_5 |
|---|---|---|---|---|---|---|
| x_0 | 0.0341 | 0.0493 | 0.0361 | -0.0008 | -0.0312 | 0.0026 |
| x_1 | 0.0493 | 0.1125 | 0.0531 | 0.0475 | 0.0047 | 0.0509 |
| x_2 | 0.0361 | 0.0531 | 0.0399 | 0.0001 | -0.0327 | 0.0058 |
| x_3 | -0.0008 | 0.0475 | 0.0001 | 0.0608 | 0.0614 | 0.0585 |
| x_4 | -0.0312 | 0.0047 | -0.0327 | 0.0614 | 0.0901 | 0.0554 |
| x_5 | 0.0026 | 0.0509 | 0.0058 | 0.0585 | 0.0554 | 0.0594 |
trans off
| state | offset |
|---|---|
| x_0 | 0.0008 |
| x_1 | -0.0083 |
| x_2 | 0.0067 |
| x_3 | 0.0023 |
| x_4 | -0.0007 |
| x_5 | 0.0005 |
obs matrix (H)
| variable | x_0 | x_1 | x_2 | x_3 | x_4 | x_5 |
|---|---|---|---|---|---|---|
| y_0 | 0.8238 | -0.1897 | -0.1014 | -0.0841 | 0.1914 | 0.1286 |
| y_1 | 0.0814 | 0.8353 | 0.1048 | 0.1040 | 0.0127 | 0.0689 |
| y_2 | -0.0742 | -0.1502 | 0.8296 | 0.1282 | 0.1811 | -0.0746 |
obs cov (R)
| variable | y_0 | y_1 | y_2 |
|---|---|---|---|
| y_0 | 0.0084 | 0.0000 | 0.0000 |
| y_1 | 0.0000 | 0.0084 | 0.0000 |
| y_2 | 0.0000 | 0.0000 | 0.0084 |
obs off
| variable | offset |
|---|---|
| y_0 | 0.0017 |
| y_1 | -0.0046 |
| y_2 | 0.0026 |
contr matrix (B)
| state | c_0 | c_1 | c_2 | c_3 | c_4 | c_5 |
|---|---|---|---|---|---|---|
| x_0 | -1.0290 | -0.0485 | -0.0022 | 0.9297 | 0.0792 | -0.0247 |
| x_1 | -0.0267 | -0.8778 | -0.0233 | 0.0113 | 0.9099 | 0.0261 |
| x_2 | -0.0018 | -0.0592 | -1.0044 | -0.0310 | 0.0674 | 0.9204 |
| x_3 | -0.0503 | -0.0312 | -0.0556 | -0.0440 | -0.0529 | -0.0587 |
| x_4 | -0.0375 | -0.0537 | -0.0246 | -0.0412 | -0.0352 | -0.0338 |
| x_5 | -0.0653 | -0.0179 | -0.0575 | -0.0655 | -0.0307 | -0.0449 |
init state mean
| state | mean |
|---|---|
| x_0 | -0.0393 |
| x_1 | 0.0044 |
| x_2 | -0.0092 |
| x_3 | 0.0207 |
| x_4 | -0.0094 |
| x_5 | -0.0021 |
init state cov
| state | x_0 | x_1 | x_2 | x_3 | x_4 | x_5 |
|---|---|---|---|---|---|---|
| x_0 | 2.5500 | -0.1402 | -0.0082 | 0.5233 | 0.3011 | 0.0882 |
| x_1 | -0.1402 | 2.4578 | 0.1502 | 0.0355 | 0.5714 | -0.0459 |
| x_2 | -0.0082 | 0.1502 | 2.5045 | 0.2739 | 0.0634 | 0.5195 |
| x_3 | 0.5233 | 0.0355 | 0.2739 | 2.3814 | -0.2201 | -0.1842 |
| x_4 | 0.3011 | 0.5714 | 0.0634 | -0.2201 | 2.3880 | -0.1162 |
| x_5 | 0.0882 | -0.0459 | 0.5195 | -0.1842 | -0.1162 | 2.3874 |

train_show_save(learn, 1, 1e-3)/home/simone/anaconda3/envs/data-science/lib/python3.10/site-packages/fastai/callback/core.py:69: UserWarning: You are shadowing an attribute (__class__) that exists in the learner. Use `self.learn.__class__` to avoid this
warn(f"You are shadowing an attribute ({name}) that exists in the learner. Use `self.learn.{name}` to avoid this")
| epoch | train_loss | valid_loss | rmse | rmse_gap | r2 | r2_gap | time |
|---|---|---|---|---|---|---|---|
| 0 | -108.023250 | -113.113034 | 0.073622 | 0.198386 | 0.956828 | -105578612000578019271909572608.000000 | 02:37 |
| state | x_0 | x_1 | x_2 | x_3 | x_4 | x_5 |
|---|---|---|---|---|---|---|
| x_0 | 1.1440 | -0.0057 | 0.0107 | 0.8262 | -0.0845 | -0.1205 |
| x_1 | -0.0534 | 1.2089 | -0.0223 | 0.0948 | 0.7937 | 0.0494 |
| x_2 | -0.0289 | 0.0176 | 1.1417 | -0.0702 | -0.1154 | 0.8351 |
| x_3 | 0.1277 | 0.0589 | 0.1074 | 1.1787 | -0.1203 | -0.1024 |
| x_4 | 0.0654 | 0.1560 | 0.0043 | -0.1781 | 1.1087 | -0.1415 |
| x_5 | 0.0959 | 0.0439 | 0.1197 | -0.1725 | -0.0340 | 1.1496 |
trans cov (Q)
| state | x_0 | x_1 | x_2 | x_3 | x_4 | x_5 |
|---|---|---|---|---|---|---|
| x_0 | 0.0207 | 0.0254 | 0.0210 | -0.0052 | -0.0232 | -0.0033 |
| x_1 | 0.0254 | 0.0548 | 0.0255 | 0.0259 | 0.0034 | 0.0281 |
| x_2 | 0.0210 | 0.0255 | 0.0222 | -0.0056 | -0.0241 | -0.0023 |
| x_3 | -0.0052 | 0.0259 | -0.0056 | 0.0473 | 0.0504 | 0.0465 |
| x_4 | -0.0232 | 0.0034 | -0.0241 | 0.0504 | 0.0695 | 0.0473 |
| x_5 | -0.0033 | 0.0281 | -0.0023 | 0.0465 | 0.0473 | 0.0477 |
trans off
| state | offset |
|---|---|
| x_0 | 0.0004 |
| x_1 | -0.0055 |
| x_2 | 0.0048 |
| x_3 | 0.0027 |
| x_4 | -0.0033 |
| x_5 | 0.0021 |
obs matrix (H)
| variable | x_0 | x_1 | x_2 | x_3 | x_4 | x_5 |
|---|---|---|---|---|---|---|
| y_0 | 0.7902 | -0.2032 | -0.1217 | -0.0986 | 0.2164 | 0.1090 |
| y_1 | 0.0160 | 0.7417 | 0.0555 | 0.1016 | 0.1041 | 0.0374 |
| y_2 | -0.1103 | -0.1594 | 0.7916 | 0.1185 | 0.2096 | -0.0949 |
obs cov (R)
| variable | y_0 | y_1 | y_2 |
|---|---|---|---|
| y_0 | 0.0077 | 0.0000 | 0.0000 |
| y_1 | 0.0000 | 0.0077 | 0.0000 |
| y_2 | 0.0000 | 0.0000 | 0.0077 |
obs off
| variable | offset |
|---|---|
| y_0 | -0.0009 |
| y_1 | -0.0053 |
| y_2 | -0.0021 |
contr matrix (B)
| state | c_0 | c_1 | c_2 | c_3 | c_4 | c_5 |
|---|---|---|---|---|---|---|
| x_0 | -1.0277 | -0.0595 | -0.0019 | 0.9181 | 0.0838 | -0.0194 |
| x_1 | -0.0414 | -0.8661 | -0.0323 | 0.0110 | 0.8741 | 0.0262 |
| x_2 | -0.0029 | -0.0647 | -1.0045 | -0.0319 | 0.0757 | 0.8957 |
| x_3 | -0.0678 | -0.0368 | -0.0564 | -0.0559 | -0.0610 | -0.0639 |
| x_4 | -0.0374 | -0.0690 | -0.0209 | -0.0485 | -0.0406 | -0.0354 |
| x_5 | -0.0591 | -0.0267 | -0.0753 | -0.0633 | -0.0407 | -0.0555 |
init state mean
| state | mean |
|---|---|
| x_0 | -0.0396 |
| x_1 | -0.0009 |
| x_2 | -0.0049 |
| x_3 | 0.0153 |
| x_4 | -0.0139 |
| x_5 | -0.0094 |
init state cov
| state | x_0 | x_1 | x_2 | x_3 | x_4 | x_5 |
|---|---|---|---|---|---|---|
| x_0 | 2.6364 | -0.1456 | -0.0020 | 0.5695 | 0.3986 | 0.1628 |
| x_1 | -0.1456 | 2.3292 | 0.1064 | 0.2074 | 0.7591 | 0.1475 |
| x_2 | -0.0020 | 0.1064 | 2.4205 | 0.3489 | 0.1488 | 0.6143 |
| x_3 | 0.5695 | 0.2074 | 0.3489 | 2.2351 | -0.4355 | -0.3277 |
| x_4 | 0.3986 | 0.7591 | 0.1488 | -0.4355 | 2.1625 | -0.3445 |
| x_5 | 0.1628 | 0.1475 | 0.6143 | -0.3277 | -0.3445 | 2.2518 |

# learn.save("17_jan_all_gaps")Path('models/17_jan_all_gaps.pth')
train_show_save(learn, 2, 1e-3)train_show_save(learn, 2, 1e-3)| epoch | train_loss | valid_loss | rmse | rmse_gap | r2 | r2_gap | time |
|---|---|---|---|---|---|---|---|
| 0 | -149.288297 | -152.804403 | 0.072760 | 0.179190 | 0.956827 | -61340364986143763995601928192.000000 | 02:35 |
| 1 | -160.099637 | -165.002097 | 0.071263 | 0.181697 | 0.963587 | -48387608947773502947627892736.000000 | 02:37 |

# learn.save("17_jan_all_gaps_final")Path('models/17_jan_all_gaps_final.pth')
Results
learn.load("17_jan_all_gaps_final");p = show_results(learn, control=hai_control, items = [items[i] for i in [0,5,4]], show_metric=False, units=list(units.values()))
pp0 = show_results(learn, control=hai_control, items = [items[i] for i in [0]], n_cols=2, show_metric=False, props={'width': 350, 'height': 250}, units=list(units.values()))
p0p1 = show_results(learn, control=hai_control, items = [items[i] for i in [5]], n_cols=2, show_metric=False, props={'width': 350, 'height': 250}, units=list(units.values()))
p1p2 = show_results(learn, control=hai_control, items = [items[i] for i in [4]], n_cols=2, show_metric=False, props={'width': 350, 'height': 250}, units=list(units.values()))
p2save_plot(p0, "results_gap_all_vars_g0")
save_plot(p1, "results_gap_all_vars_g1")
save_plot(p2, "results_gap_all_vars_g2")Path('/home/simone/Documents/uni/Thesis/GPFA_imputation/analysis/presentations/plots_18_jan/results_gap_all_vars_g2.vl.json')
interact_results(learn, hai, hai_era)/home/simone/anaconda3/envs/data-science/lib/python3.10/site-packages/fastai/callback/core.py:69: UserWarning: You are shadowing an attribute (__class__) that exists in the learner. Use `self.learn.__class__` to avoid this
warn(f"You are shadowing an attribute ({name}) that exists in the learner. Use `self.learn.{name}` to avoid this")
<function meteo_imp.kalman.fastai.interact_results.<locals>._inner(gap_len, items_idx, control_lags, block_len, shift, **var_names)>
Gap in only 1 variable
dls2 = imp_dataloader(hai, hai_era, var_sel = ['TA'], block_len=200, gap_len=10, bs=20, control_lags=[1], n_rep=2)model2 = init_smart(3,3, hai, pca=True).cuda()model2| state | x_0 | x_1 | x_2 | x_3 | x_4 | x_5 |
|---|---|---|---|---|---|---|
| x_0 | 1.0000 | 0.0000 | 0.0000 | 1.0000 | 0.0000 | 0.0000 |
| x_1 | 0.0000 | 1.0000 | 0.0000 | 0.0000 | 1.0000 | 0.0000 |
| x_2 | 0.0000 | 0.0000 | 1.0000 | 0.0000 | 0.0000 | 1.0000 |
| x_3 | 0.0000 | 0.0000 | 0.0000 | 1.0000 | 0.0000 | 0.0000 |
| x_4 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 1.0000 | 0.0000 |
| x_5 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 1.0000 |
trans cov (Q)
| state | x_0 | x_1 | x_2 | x_3 | x_4 | x_5 |
|---|---|---|---|---|---|---|
| x_0 | 0.1000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |
| x_1 | 0.0000 | 0.1000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |
| x_2 | 0.0000 | 0.0000 | 0.1000 | 0.0000 | 0.0000 | 0.0000 |
| x_3 | 0.0000 | 0.0000 | 0.0000 | 0.1000 | 0.0000 | 0.0000 |
| x_4 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.1000 | 0.0000 |
| x_5 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.1000 |
trans off
| state | offset |
|---|---|
| x_0 | 0.0000 |
| x_1 | 0.0000 |
| x_2 | 0.0000 |
| x_3 | 0.0000 |
| x_4 | 0.0000 |
| x_5 | 0.0000 |
obs matrix (H)
| variable | x_0 | x_1 | x_2 | x_3 | x_4 | x_5 |
|---|---|---|---|---|---|---|
| y_0 | 0.0168 | 0.9301 | -0.3669 | 0.0000 | 0.0000 | 0.0000 |
| y_1 | 0.9998 | -0.0198 | -0.0045 | 0.0000 | 0.0000 | 0.0000 |
| y_2 | 0.0114 | 0.3667 | 0.9303 | 0.0000 | 0.0000 | 0.0000 |
obs cov (R)
| variable | y_0 | y_1 | y_2 |
|---|---|---|---|
| y_0 | 0.0100 | 0.0000 | 0.0000 |
| y_1 | 0.0000 | 0.0100 | 0.0000 |
| y_2 | 0.0000 | 0.0000 | 0.0100 |
obs off
| variable | offset |
|---|---|
| y_0 | 0.0000 |
| y_1 | 0.0000 |
| y_2 | 0.0000 |
contr matrix (B)
| state | c_0 | c_1 | c_2 | c_3 | c_4 | c_5 |
|---|---|---|---|---|---|---|
| x_0 | -0.0168 | -0.9998 | -0.0114 | 0.0168 | 0.9998 | 0.0114 |
| x_1 | -0.9301 | 0.0198 | -0.3667 | 0.9301 | -0.0198 | 0.3667 |
| x_2 | 0.3669 | 0.0045 | -0.9303 | -0.3669 | -0.0045 | 0.9303 |
| x_3 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |
| x_4 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |
| x_5 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |
init state mean
| state | mean |
|---|---|
| x_0 | 0.0000 |
| x_1 | 0.0000 |
| x_2 | 0.0000 |
| x_3 | 0.0000 |
| x_4 | 0.0000 |
| x_5 | 0.0000 |
init state cov
| state | x_0 | x_1 | x_2 | x_3 | x_4 | x_5 |
|---|---|---|---|---|---|---|
| x_0 | 3.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |
| x_1 | 0.0000 | 3.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |
| x_2 | 0.0000 | 0.0000 | 3.0000 | 0.0000 | 0.0000 | 0.0000 |
| x_3 | 0.0000 | 0.0000 | 0.0000 | 3.0000 | 0.0000 | 0.0000 |
| x_4 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 3.0000 | 0.0000 |
| x_5 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 3.0000 |
loss2 = loss_func=KalmanLoss(only_gap=False)
learn2 = Learner(dls2, model2, loss2, cbs=[Float64Callback], metrics=imp_metrics)train_show_save(learn2, 1, 1e-3)| epoch | train_loss | valid_loss | rmse | rmse_gap | r2 | r2_gap | time |
|---|---|---|---|---|---|---|---|
| 0 | -60.562467 | -78.105121 | 0.041354 | 0.095727 | 0.974146 | -10.220484 | 03:00 |

train_show_save(learn2, 3, 1e-3)| epoch | train_loss | valid_loss | rmse | rmse_gap | r2 | r2_gap | time |
|---|---|---|---|---|---|---|---|
| 0 | -99.062288 | -107.225395 | 0.043633 | 0.077547 | 0.955371 | -5.651017 | 02:56 |
| 1 | -115.215551 | -122.628456 | 0.048706 | 0.075058 | 0.960905 | -4.600422 | 02:52 |
| 2 | -130.109601 | -137.303035 | 0.054807 | 0.069069 | 0.966896 | -3.014253 | 02:58 |

learn2.loss_func = KalmanLoss(only_gap=True)train_show_save(learn2, 1, .5e-3)| epoch | train_loss | valid_loss | rmse | rmse_gap | r2 | r2_gap | time |
|---|---|---|---|---|---|---|---|
| 0 | -2.358371 | -2.383616 | 0.058387 | 0.077012 | 0.958465 | -4.192017 | 01:52 |

train_show_save(learn2, 1, 2e-4)| epoch | train_loss | valid_loss | rmse | rmse_gap | r2 | r2_gap | time |
|---|---|---|---|---|---|---|---|
| 0 | -2.387042 | -2.445100 | 0.057919 | 0.073482 | 0.956432 | -3.904405 | 01:51 |
